3e5a4e67w_DWgjIJ17Tlossu1LGujQ xenolinux-2.4.25-sparse/include/asm-xeno/highmem.h
3e5a4e67YtcyDLQsShhCfQwPSELfvA xenolinux-2.4.25-sparse/include/asm-xeno/hw_irq.h
3e5a4e677VBavzM1UZIEcH1B-RlXMA xenolinux-2.4.25-sparse/include/asm-xeno/hypervisor.h
+4060044fVx7-tokvNLKBf_6qBB4lqQ xenolinux-2.4.25-sparse/include/asm-xeno/io.h
3e5a4e673p7PEOyHFm3nHkYX6HQYBg xenolinux-2.4.25-sparse/include/asm-xeno/irq.h
3ead095db_LRUXnxaqs0dA1DWhPoQQ xenolinux-2.4.25-sparse/include/asm-xeno/keyboard.h
3e5a4e678ddsQOpbSiRdy1GRcDc9WA xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h
if ( !sched_rem_domain(p) )
return;
- printk("Killing domain %llu\n", p->domain);
+ DPRINTK("Killing domain %llu\n", p->domain);
unlink_blkdev_info(p);
ASSERT(p->state == TASK_DYING);
ASSERT(!p->has_cpu);
- printk("Releasing task %llu\n", p->domain);
+ DPRINTK("Releasing task %llu\n", p->domain);
/*
* This frees up blkdev rings and vbd-access lists. Totally safe since
* size of teh region, is faked out by a very simple state machine,
* preventing direct writes to the PCI config registers by a guest.
*
- * XXX Some comment on IRQ handling
+ * Interrupt handling is currently done in a very cheese fashion.
+ * We take the default irq controller code and replace it with our own.
+ * If an interrupt comes in it is acked using the PICs normal routine. Then
+ * an event is send to the receiving domain which has to explicitly call
+ * once it is finished dealing with the interrupt. Only then the PICs end
+ * handler is called. very cheesy with all sorts of problems but it seems
+ * to work in normal cases. No shared interrupts are allowed.
+ *
+ * XXX this code is not SMP safe at the moment!
*/
/* an array of device descriptors index by IRQ number */
static phys_dev_t *irqs[MAX_IRQS];
+/*
+ *
+ * General functions
+ *
+ */
+
/* find a device on the device list */
static phys_dev_t *find_pdev(struct task_struct *p, struct pci_dev *dev)
{
return 0;
}
+/*
+ *
+ * PCI config space access
+ *
+ */
/*
* Base address registers contain the base address for IO regions.
if ( res->flags & IORESOURCE_MEM )
{
+ /* this is written out explicitly for clarity */
*val = 0xffffffff;
/* bit 0 = 0 */
/* bit 21 = memory type */
func, reg, len, &val);
return ret;
break;
+#if 0
+ case 0xe0: /* XXX some device drivers seem to write to this.... */
+ printk("pci write hack allowed %02x:%02x:%02x: "
+ "reg=0x%02x len=0x%02x val=0x%08x\n",
+ bus, dev, func, reg, len, val);
+ break;
+#endif
default:
//if ( pdev->flags != ACC_WRITE )
/* XXX for debug we disallow all write access */
return ret;
}
+/*
+ *
+ * Interrupt handling
+ *
+ */
+
/*
* return the IRQ xen assigned to the device.
return;
}
- //printk("irq %d pdev=%p\n", irq, pdev);
-
p = pdev->owner;
- //printk("owner %p\n", p);
-
if ( test_bit(irq, &p->shared_info->physirq_pend) )
{
- printk("irq %d already delivered to guest\n", irq);
+ /* Some interrupt already delivered to guest */
return;
}
+
/* notify guest */
set_bit(irq, &p->shared_info->physirq_pend);
set_bit(ST_IRQ_DELIVERED, &pdev->state);
- cpu_mask |= mark_guest_event(p, _EVENT_TIMER);
+ cpu_mask |= mark_guest_event(p, _EVENT_PHYSIRQ);
guest_event_notify(cpu_mask);
}
/* this is called instead of the PICs original end handler.
- * the real end handler is only called once the guest ack'ed the handling
+ * the real end handler is only called once the guest signalled the handling
* of the event. */
static void end_virt_irq (unsigned int i)
{
return -EINVAL;
}
- printk("pdev= %p\n", pdev);
-
if ( irq >= MAX_IRQS )
{
printk("requested IRQ to big %d\n", irq);
printk ("setup handler %d\n", irq);
- /* request the IRQ. this is not shared! */
- err = request_irq(irq, phys_dev_interrupt, 0, "network", (void *)pdev);
+ /* request the IRQ. this is not shared and we use a slow handler! */
+ err = request_irq(irq, phys_dev_interrupt, SA_INTERRUPT,
+ "foo", (void *)pdev);
if ( err )
{
printk("error requesting irq\n");
static long pci_free_irq(int irq)
{
- /* XXX restore original handler and free_irq() */
+ phys_dev_t *pdev;
+
+ if ( irq >= MAX_IRQS )
+ {
+ printk("requested IRQ to big %d\n", irq);
+ return -EINVAL;
+ }
+
+ if ( irqs[irq] == NULL )
+ {
+ printk ("irq not used %d\n", irq);
+ return -EINVAL;
+ }
+
+ pdev = irqs[irq];
+
+ /* shutdown IRQ */
+ free_irq(irq, (void *)pdev);
+
+ /* restore irq controller */
+ irq_desc[irq].handler = pdev->orig_handler;
+
+ /* clean up */
+ pdev->orig_handler = NULL;
+ irqs[irq] = NULL;
+ kfree(pdev->new_handler);
+ pdev->new_handler = NULL;
+
+ printk("freed irq %d", irq);
return 0;
}
return 0;
}
+
/*
* demux hypervisor call.
*/
} rx_entry_t;
-#define TX_RING_SIZE 256
-#define RX_RING_SIZE 256
+#define XENNET_TX_RING_SIZE 256
+#define XENNET_RX_RING_SIZE 256
#define MAX_DOMAIN_VIFS 8
/* This structure must fit in a memory page. */
typedef struct net_ring_st
{
- tx_entry_t tx_ring[TX_RING_SIZE];
- rx_entry_t rx_ring[RX_RING_SIZE];
+ tx_entry_t tx_ring[XENNET_TX_RING_SIZE];
+ rx_entry_t rx_ring[XENNET_RX_RING_SIZE];
} net_ring_t;
/*
* size of the ring buffer. The following macros convert a free-running counter
* into a value that can directly index a ring-buffer array.
*/
-#define MASK_NET_RX_IDX(_i) ((_i)&(RX_RING_SIZE-1))
-#define MASK_NET_TX_IDX(_i) ((_i)&(TX_RING_SIZE-1))
+#define MASK_NET_RX_IDX(_i) ((_i)&(XENNET_RX_RING_SIZE-1))
+#define MASK_NET_TX_IDX(_i) ((_i)&(XENNET_TX_RING_SIZE-1))
typedef struct net_idx_st
{
extern struct net_device *the_dev;
-/*
- * shadow ring structures are used to protect the descriptors from
- * tampering after they have been passed to the hypervisor.
- *
- * TX_RING_SIZE and RX_RING_SIZE are defined in the shared network.h.
+/*
+ * shadow ring structures are used to protect the descriptors from tampering
+ * after they have been passed to the hypervisor.
+ *
+ * XENNET_TX_RING_SIZE and XENNET_RX_RING_SIZE are defined in the shared
+ * network.h.
*/
typedef struct rx_shadow_entry_st
net_idx_t *shared_idxs;
/* The private rings and indexes. */
- rx_shadow_entry_t rx_shadow_ring[RX_RING_SIZE];
+ rx_shadow_entry_t rx_shadow_ring[XENNET_RX_RING_SIZE];
NET_RING_IDX rx_prod; /* More buffers for filling go here. */
NET_RING_IDX rx_cons; /* Next buffer to fill is here. */
- tx_shadow_entry_t tx_shadow_ring[TX_RING_SIZE];
+ tx_shadow_entry_t tx_shadow_ring[XENNET_TX_RING_SIZE];
NET_RING_IDX tx_prod; /* More packets for sending go here. */
NET_RING_IDX tx_cons; /* Next packet to send is here. */
again:
for ( i = vif->tx_req_cons;
(i != shared_idxs->tx_req_prod) &&
- ((i-vif->tx_resp_prod) != TX_RING_SIZE);
+ ((i-vif->tx_resp_prod) != XENNET_TX_RING_SIZE);
i++ )
{
tx = shared_rings->tx_ring[MASK_NET_TX_IDX(i)].req;
j = vif->rx_prod;
for ( i = vif->rx_req_cons;
(i != shared_idxs->rx_req_prod) &&
- ((i-vif->rx_resp_prod) != RX_RING_SIZE);
+ ((i-vif->rx_resp_prod) != XENNET_RX_RING_SIZE);
i++ )
{
rx = shared_rings->rx_ring[MASK_NET_RX_IDX(i)].req;
spin_lock(&vif->rx_lock);
for ( i = vif->rx_req_cons;
(i != shared_idxs->rx_req_prod) &&
- ((i-vif->rx_resp_prod) != RX_RING_SIZE);
+ ((i-vif->rx_resp_prod) != XENNET_RX_RING_SIZE);
i++ )
{
make_rx_response(vif, shared_rings->rx_ring[MASK_NET_RX_IDX(i)].req.id,
spin_lock(&vif->tx_lock);
for ( i = vif->tx_req_cons;
(i != shared_idxs->tx_req_prod) &&
- ((i-vif->tx_resp_prod) != TX_RING_SIZE);
+ ((i-vif->tx_resp_prod) != XENNET_TX_RING_SIZE);
i++ )
{
make_tx_response(vif, shared_rings->tx_ring[MASK_NET_TX_IDX(i)].req.id,
bool 'PCI support' CONFIG_PCI
if [ "$CONFIG_PCI" = "y" ]; then
tristate ' 3c590/3c900 series (592/595/597) "Vortex/Boomerang" support' CONFIG_VORTEX
+ tristate 'Intel(R) PRO/1000 Gigabit Ethernet support' CONFIG_E1000
+ if [ "$CONFIG_E1000" != "n" ]; then
+ bool ' Use Rx Polling (NAPI)' CONFIG_E1000_NAPI
+ fi
fi
source drivers/pci/Config.in
* {tx,rx}_skbs store outstanding skbuffs. The first entry in each
* array is an index into a chain of free entries.
*/
- struct sk_buff *tx_skbs[TX_RING_SIZE+1];
- struct sk_buff *rx_skbs[RX_RING_SIZE+1];
+ struct sk_buff *tx_skbs[XENNET_TX_RING_SIZE+1];
+ struct sk_buff *rx_skbs[XENNET_RX_RING_SIZE+1];
};
/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
memset(np->net_idx, 0, sizeof(*np->net_idx));
/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
- for ( i = 0; i <= TX_RING_SIZE; i++ )
+ for ( i = 0; i <= XENNET_TX_RING_SIZE; i++ )
np->tx_skbs[i] = (void *)(i+1);
- for ( i = 0; i <= RX_RING_SIZE; i++ )
+ for ( i = 0; i <= XENNET_RX_RING_SIZE; i++ )
np->rx_skbs[i] = (void *)(i+1);
wmb();
}
while ( prod != np->net_idx->tx_resp_prod );
- if ( np->tx_full && ((np->net_idx->tx_req_prod - prod) < TX_RING_SIZE) )
+ if ( np->tx_full &&
+ ((np->net_idx->tx_req_prod - prod) < XENNET_TX_RING_SIZE) )
{
np->tx_full = 0;
if ( np->state == STATE_ACTIVE )
netop_t netop;
NET_RING_IDX i = np->net_idx->rx_req_prod;
- if ( unlikely((i - np->rx_resp_cons) == RX_RING_SIZE) ||
+ if ( unlikely((i - np->rx_resp_cons) == XENNET_RX_RING_SIZE) ||
unlikely(np->state != STATE_ACTIVE) )
return;
np->rx_bufs_to_notify++;
}
- while ( (++i - np->rx_resp_cons) != RX_RING_SIZE );
+ while ( (++i - np->rx_resp_cons) != XENNET_RX_RING_SIZE );
/*
* We may have allocated buffers which have entries outstanding in the page
np->net_idx->rx_event = np->rx_resp_cons + 1;
/* Batch Xen notifications. */
- if ( np->rx_bufs_to_notify > (RX_RING_SIZE/4) )
+ if ( np->rx_bufs_to_notify > (XENNET_RX_RING_SIZE/4) )
{
netop.cmd = NETOP_PUSH_BUFFERS;
netop.vif = np->idx;
network_tx_buf_gc(dev);
- if ( (i - np->tx_resp_cons) == (TX_RING_SIZE - 1) )
+ if ( (i - np->tx_resp_cons) == (XENNET_TX_RING_SIZE - 1) )
{
np->tx_full = 1;
netif_stop_queue(dev);
* {tx,rx}_skbs store outstanding skbuffs. The first entry in each
* array is an index into a chain of free entries.
*/
- struct sk_buff *tx_skbs[TX_RING_SIZE+1];
- struct sk_buff *rx_skbs[RX_RING_SIZE+1];
+ struct sk_buff *tx_skbs[XENNET_TX_RING_SIZE+1];
+ struct sk_buff *rx_skbs[XENNET_RX_RING_SIZE+1];
};
/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
memset(np->net_idx, 0, sizeof(*np->net_idx));
/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
- for ( i = 0; i <= TX_RING_SIZE; i++ )
+ for ( i = 0; i <= XENNET_TX_RING_SIZE; i++ )
np->tx_skbs[i] = (void *)(i+1);
- for ( i = 0; i <= RX_RING_SIZE; i++ )
+ for ( i = 0; i <= XENNET_RX_RING_SIZE; i++ )
np->rx_skbs[i] = (void *)(i+1);
wmb();
}
while ( prod != np->net_idx->tx_resp_prod );
- if ( np->tx_full && ((np->net_idx->tx_req_prod - prod) < TX_RING_SIZE) )
+ if ( np->tx_full &&
+ ((np->net_idx->tx_req_prod - prod) < XENNET_TX_RING_SIZE) )
{
np->tx_full = 0;
if ( np->state == STATE_ACTIVE )
netop_t netop;
NET_RING_IDX i = np->net_idx->rx_req_prod;
- if ( unlikely((i - np->rx_resp_cons) == RX_RING_SIZE) ||
+ if ( unlikely((i - np->rx_resp_cons) == XENNET_RX_RING_SIZE) ||
unlikely(np->state != STATE_ACTIVE) )
return;
np->rx_bufs_to_notify++;
}
- while ( (++i - np->rx_resp_cons) != RX_RING_SIZE );
+ while ( (++i - np->rx_resp_cons) != XENNET_RX_RING_SIZE );
/*
* We may have allocated buffers which have entries outstanding in the page
np->net_idx->rx_event = np->rx_resp_cons + 1;
/* Batch Xen notifications. */
- if ( np->rx_bufs_to_notify > (RX_RING_SIZE/4) )
+ if ( np->rx_bufs_to_notify > (XENNET_RX_RING_SIZE/4) )
{
netop.cmd = NETOP_PUSH_BUFFERS;
netop.vif = np->idx;
network_tx_buf_gc(dev);
- if ( (i - np->tx_resp_cons) == (TX_RING_SIZE - 1) )
+ if ( (i - np->tx_resp_cons) == (XENNET_TX_RING_SIZE - 1) )
{
np->tx_full = 1;
netif_stop_queue(dev);
*/
#include <linux/config.h>
-#include <asm/atomic.h>
#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <asm/atomic.h>
#include <asm/hypervisor.h>
#include <asm/system.h>
#include <asm/ptrace.h>
static unsigned long event_mask = 0;
+asmlinkage unsigned int do_physirq(int irq, struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ unsigned long irqs;
+ shared_info_t *shared = HYPERVISOR_shared_info;
+
+ /* do this manually */
+ kstat.irqs[cpu][irq]++;
+ ack_hypervisor_event(irq);
+
+ barrier();
+ irqs = xchg(&shared->physirq_pend, 0);
+
+ __asm__ __volatile__ (
+ " push %1 ;"
+ " sub $4,%%esp ;"
+ " jmp 3f ;"
+ "1: btrl %%eax,%0 ;" /* clear bit */
+ " mov %%eax,(%%esp) ;"
+ " call do_IRQ ;" /* do_IRQ(event) */
+ "3: bsfl %0,%%eax ;" /* %eax == bit # */
+ " jnz 1b ;"
+ " add $8,%%esp ;"
+ /* we use %ebx because it is callee-saved */
+ : : "b" (irqs), "r" (regs)
+ /* clobbered by callback function calls */
+ : "eax", "ecx", "edx", "memory" );
+
+ /* do this manually */
+ end_hypervisor_event(irq);
+
+ return 0;
+}
+
void do_hypervisor_callback(struct pt_regs *regs)
{
unsigned long events, flags;
events = xchg(&shared->events, 0);
events &= event_mask;
+ if ( (events & EVENT_PHYSIRQ) != 0 )
+ {
+ do_physirq(_EVENT_PHYSIRQ, regs);
+ events &= ~EVENT_PHYSIRQ;
+ }
+
__asm__ __volatile__ (
" push %1 ;"
" sub $4,%%esp ;"
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/pm.h>
-//XXX ??? #include <linux/pci.h>
+#include <linux/pci.h>
#include <linux/apm_bios.h>
#include <linux/kernel.h>
#include <linux/string.h>
EXPORT_SYMBOL(apm_info);
//EXPORT_SYMBOL(gdt);
EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(phys_to_machine_mapping);
+
#ifdef CONFIG_DEBUG_IOVIRT
EXPORT_SYMBOL(__io_virt_debug);
EXPORT_SYMBOL(__generic_copy_to_user);
EXPORT_SYMBOL(strnlen_user);
+
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pcibios_penalize_isa_irq);
+EXPORT_SYMBOL(pci_mem_start);
+#endif
+
+
#ifdef CONFIG_X86_USE_3DNOW
EXPORT_SYMBOL(_mmx_memcpy);
EXPORT_SYMBOL(mmx_clear_page);
printk("startup_physirq_event %d\n", irq);
/*
- * install a interrupt handler for physirq event when called thefirst tim
+ * install a interrupt handler for physirq event when called first time
+ * we actually are never executing the handler as _EVENT_PHYSIRQ is
+ * handled specially in hypervisor.c But we need to enable the event etc.
*/
if ( !setup_event_handler )
{
}
return 0;
}
+/*
+ * This is a dummy interrupt handler.
+ * It should never be called. events for physical interrupts are handled
+ * differently in hypervisor.c
+ */
+static void physirq_interrupt(int irq, void *unused, struct pt_regs *ptregs)
+{
+ printk("XXX This should never be called!");
+}
+
+/*
+ * IRQ is not needed anymore.
+ */
static void shutdown_physirq_event(unsigned int irq)
{
+ physdev_op_t op;
+ int err;
- /* call xen to free IRQ */
+ printk("shutdown_phys_irq called.");
+ /*
+ * tell hypervisor
+ */
+ op.cmd = PHYSDEVOP_FREE_IRQ;
+ op.u.free_irq.irq = irq;
+ if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
+ {
+ printk(KERN_ALERT "could not free IRQ %d\n", irq);
+ return;
+ }
+ return;
}
static void enable_physirq_event(unsigned int irq)
{
- /* XXX just enable all interrupts for now */
+ /* XXX just enable all phys interrupts for now */
+ enable_irq(HYPEREVENT_IRQ(_EVENT_PHYSIRQ));
}
static void disable_physirq_event(unsigned int irq)
{
- /* XXX just disable all interrupts for now */
+ /* XXX just disable all phys interrupts for now */
+ disable_irq(HYPEREVENT_IRQ(_EVENT_PHYSIRQ));
}
static void ack_physirq_event(unsigned int irq)
{
int err;
physdev_op_t op;
+
/* call hypervisor */
op.cmd = PHYSDEVOP_FINISHED_IRQ;
op.u.finished_irq.irq = irq;
};
-/*
- * this interrupt handler demuxes the virt phys event and the virt phys
- * bitmask and calls the interrupt handlers for virtualised physical interrupts
- */
-static void physirq_interrupt(int irq, void *unused, struct pt_regs *ptregs)
-{
-#if 0
- unsigned long flags;
- int virq;
- local_irq_save(flags);
- do_IRQ(virq);
- local_irq_restore(flags);
-#endif
-}
-
void __init physirq_init(void)
{
--- /dev/null
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/config.h>
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+ /*
+ * Bit simplified and optimized by Jan Hubicka
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+ *
+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+ * isa_read[wl] and isa_write[wl] fixed
+ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ */
+
+#define IO_SPACE_LIMIT 0xffff
+
+#define XQUAD_PORTIO_BASE 0xfe400000
+#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
+#define XQUAD_PORTIO_LEN 0x80000 /* Only remapping first 2 quads */
+
+#ifdef __KERNEL__
+
+#include <linux/vmalloc.h>
+
+/*
+ * Temporary debugging check to catch old code using
+ * unmapped ISA addresses. Will be removed in 2.4.
+ */
+#if CONFIG_DEBUG_IOVIRT
+ extern void *__io_virt_debug(unsigned long x, const char *file, int line);
+ extern unsigned long __io_phys_debug(unsigned long x, const char *file, int line);
+ #define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__)
+//#define __io_phys(x) __io_phys_debug((unsigned long)(x), __FILE__, __LINE__)
+#else
+ #define __io_virt(x) ((void *)(x))
+//#define __io_phys(x) __pa(x)
+#endif
+
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa(address);
+}
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline void * phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#ifdef CONFIG_HIGHMEM64G
+#define page_to_phys(page) ((u64)(page - mem_map) << PAGE_SHIFT)
+#else
+#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
+#endif
+
+extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+/**
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ */
+
+static inline void * ioremap (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+/**
+ * ioremap_nocache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * on the CPU as well as honouring existing caching rules from things like
+ * the PCI bus. Note that there are other caches and buffers on many
+ * busses. In paticular driver authors should read up on PCI writes
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ */
+
+static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, _PAGE_PCD);
+}
+
+extern void iounmap(void *addr);
+
+/*
+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ * A boot-time mapping is currently limited to at most 16 pages.
+ */
+extern void *bt_ioremap(unsigned long offset, unsigned long size);
+extern void bt_iounmap(void *addr, unsigned long size);
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
+#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
+#define page_to_bus(_x) phys_to_machine(page_to_phys(_x))
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
+#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
+#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+
+#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
+#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
+#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
+#define memset_io(a,b,c) __memset(__io_virt(a),(b),(c))
+#define memcpy_fromio(a,b,c) __memcpy((a),__io_virt(b),(c))
+#define memcpy_toio(a,b,c) __memcpy(__io_virt(a),(b),(c))
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char *)(PAGE_OFFSET))
+
+#define isa_readb(a) readb(__ISA_IO_base + (a))
+#define isa_readw(a) readw(__ISA_IO_base + (a))
+#define isa_readl(a) readl(__ISA_IO_base + (a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
+#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
+#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
+#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
+
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(b),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d))
+
+/**
+ * check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the mmio address io_addr. This
+ * address should have been obtained by ioremap.
+ * Returns 1 on a match.
+ */
+
+static inline int check_signature(unsigned long io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/**
+ * isa_check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the ISA mmio address io_addr.
+ * Returns 1 on a match.
+ *
+ * This function is deprecated. New drivers should use ioremap and
+ * check_signature.
+ */
+
+
+static inline int isa_check_signature(unsigned long io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (isa_readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/*
+ * Cache management
+ *
+ * This needed for two cases
+ * 1. Out of order aware processors
+ * 2. Accidentally out of order processors (PPro errata #51)
+ */
+
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+static inline void flush_write_buffers(void)
+{
+ __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+}
+
+#define dma_cache_inv(_start,_size) flush_write_buffers()
+#define dma_cache_wback(_start,_size) flush_write_buffers()
+#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
+
+#else
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+#define flush_write_buffers()
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
+#else
+#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
+#endif
+
+#ifdef REALLY_SLOW_IO
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#else
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+
+#ifdef CONFIG_MULTIQUAD
+extern void *xquad_portio; /* Where the IO area was mapped */
+#endif /* CONFIG_MULTIQUAD */
+
+/*
+ * Talk about misusing macros..
+ */
+#define __OUT1(s,x) \
+static inline void out##s(unsigned x value, unsigned short port) {
+
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+
+#if defined (CONFIG_MULTIQUAD) && !defined(STANDALONE)
+#define __OUTQ(s,ss,x) /* Do the equivalent of the portio op on quads */ \
+static inline void out##ss(unsigned x value, unsigned short port) { \
+ if (xquad_portio) \
+ write##s(value, (unsigned long) xquad_portio + port); \
+ else /* We're still in early boot, running on quad 0 */ \
+ out##ss##_local(value, port); \
+} \
+static inline void out##ss##_quad(unsigned x value, unsigned short port, int quad) { \
+ if (xquad_portio) \
+ write##s(value, (unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\
+ + port); \
+}
+
+#define __INQ(s,ss) /* Do the equivalent of the portio op on quads */ \
+static inline RETURN_TYPE in##ss(unsigned short port) { \
+ if (xquad_portio) \
+ return read##s((unsigned long) xquad_portio + port); \
+ else /* We're still in early boot, running on quad 0 */ \
+ return in##ss##_local(port); \
+} \
+static inline RETURN_TYPE in##ss##_quad(unsigned short port, int quad) { \
+ if (xquad_portio) \
+ return read##s((unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\
+ + port); \
+ else\
+ return 0;\
+}
+#endif /* CONFIG_MULTIQUAD && !STANDALONE */
+
+#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));}
+#else
+/* Make the default portio routines operate on quad 0 */
+#define __OUT(s,s1,x) \
+__OUT1(s##_local,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p_local,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
+__OUTQ(s,s,x) \
+__OUTQ(s,s##_p,x)
+#endif /* !CONFIG_MULTIQUAD || STANDALONE */
+
+#define __IN1(s) \
+static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+
+#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; }
+#else
+/* Make the default portio routines operate on quad 0 */
+#define __IN(s,s1,i...) \
+__IN1(s##_local) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__IN1(s##_p_local) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__INQ(s,s) \
+__INQ(s,s##_p)
+#endif /* !CONFIG_MULTIQUAD || STANDALONE */
+
+#define __INS(s) \
+static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; ins" #s \
+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define __OUTS(s) \
+static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; outs" #s \
+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define RETURN_TYPE unsigned char
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+__INS(b)
+__INS(w)
+__INS(l)
+
+__OUTS(b)
+__OUTS(w)
+__OUTS(l)
+
+#endif
ln -sf ../asm-i386/hdreg.h
ln -sf ../asm-i386/i387.h
ln -sf ../asm-i386/ide.h
-ln -sf ../asm-i386/init.h
-ln -sf ../asm-i386/io.h
+ln -sf ../asm-i386/init.h
ln -sf ../asm-i386/io_apic.h
ln -sf ../asm-i386/ioctl.h
ln -sf ../asm-i386/ioctls.h